Copy it to struct arch_vcpu.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
DEFINE(IA64_VCPU_INSVC3_OFFSET, offsetof (struct vcpu, arch.insvc[3]));
DEFINE(IA64_VCPU_STARTING_RID_OFFSET, offsetof (struct vcpu, arch.starting_rid));
DEFINE(IA64_VCPU_ENDING_RID_OFFSET, offsetof (struct vcpu, arch.ending_rid));
+ DEFINE(IA64_VCPU_RID_BITS_OFFSET, offsetof (struct vcpu, arch.rid_bits));
DEFINE(IA64_VCPU_DOMAIN_ITM_OFFSET, offsetof (struct vcpu, arch.domain_itm));
DEFINE(IA64_VCPU_DOMAIN_ITM_LAST_OFFSET, offsetof (struct vcpu, arch.domain_itm_last));
DEFINE(IA64_VCPU_ITLB_OFFSET, offsetof (struct vcpu, arch.itlb));
BLANK();
DEFINE(IA64_DOMAIN_SHADOW_BITMAP_OFFSET, offsetof (struct domain, arch.shadow_bitmap));
- DEFINE(IA64_DOMAIN_RID_BITS_OFFSET, offsetof (struct domain, arch.rid_bits));
BLANK();
#ifndef ACCE_MOV_TO_RR
br.many vmx_virtualization_fault_back
#endif
- add r22=IA64_VCPU_DOMAIN_OFFSET,r21
+ add r22=IA64_VCPU_RID_BITS_OFFSET,r21
extr.u r16=r25,20,7 // r3
extr.u r17=r25,13,7 // r2
;;
- ld8 r22=[r22] // Get domain
movl r20=asm_mov_from_reg
;;
adds r30=vmx_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
vmx_asm_mov_to_rr_back_2:
adds r30=vmx_resume_to_guest-asm_mov_from_reg,r20
shladd r27=r23,3,r27 // address of VRR
- add r22=IA64_DOMAIN_RID_BITS_OFFSET,r22
;;
ld1 r22=[r22] // Load rid_bits from domain
mov b0=r18 // restore b0
v->arch.starting_rid = d->arch.starting_rid;
v->arch.ending_rid = d->arch.ending_rid;
+ v->arch.rid_bits = d->arch.rid_bits;
v->arch.breakimm = d->arch.breakimm;
v->arch.last_processor = INVALID_PROCESSOR;
v->arch.vhpt_pg_shift = PAGE_SHIFT;
int breakimm; // from arch_domain (so is pinned)
int starting_rid; /* first RID assigned to domain */
int ending_rid; /* one beyond highest RID assigned to domain */
+ unsigned char rid_bits; // from arch_domain (so is pinned)
/* Bitset for debug register use. */
unsigned int dbg_used;